--- title: Feature keywords: fastai sidebar: home_sidebar summary: "Module for working with local features: keypoints, detectors, matches, etc." description: "Module for working with local features: keypoints, detectors, matches, etc." nb_path: "nbs/feature.ipynb" ---
{% raw %}
{% endraw %} {% raw %}

laf_from_opencv_kpts[source]

laf_from_opencv_kpts(kpts:List[KeyPoint], mrSize:float=6.0, device:device=device(type='cpu'), with_resp:bool=False)

{% endraw %} {% raw %}

visualize_LAF[source]

visualize_LAF(img, LAF, img_idx=0, color='r', draw_ori=True, **kwargs)

{% endraw %} {% raw %}

opencv_kpts_from_laf[source]

opencv_kpts_from_laf(lafs:Tensor, mrSize:float=1.0, resps:Optional[Tensor]=None)

{% endraw %} {% raw %}
{% endraw %}

Let's detect ORB keypoints and convert them to and from OpenCV

{% raw %}
%matplotlib inline

import matplotlib.pyplot as plt
img = cv2.cvtColor(cv2.imread('data/strahov.png'), cv2.COLOR_BGR2RGB)

det = cv2.ORB_create(500)
kps, descs = det.detectAndCompute(img, None)

out_img = cv2.drawKeypoints(img, kps, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
plt.imshow(out_img)
libpng warning: iCCP: known incorrect sRGB profile
<matplotlib.image.AxesImage at 0x161c486a0>
{% endraw %} {% raw %}
lafs, r = laf_from_opencv_kpts(kps, 1.0, with_resp=True)
visualize_LAF(K.image_to_tensor(img, False), lafs, 0, 'y', draw_ori=False, figsize=(8,6))
{% endraw %} {% raw %}
kps_back = opencv_kpts_from_laf(lafs, 1.0, r)
out_img = cv2.drawKeypoints(img, kps_back, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
plt.imshow(out_img)
<matplotlib.image.AxesImage at 0x164470550>
{% endraw %}

OpenCV uses different conventions for the local feature scale.

E.g. to get equivalent kornia LAF from ORB keypoints, one should you mrSize = 0.5, while for SIFT -- 6.0. The orientation convention is also different for kornia and OpenCV.

{% raw %}

laf_from_opencv_ORB_kpts[source]

laf_from_opencv_ORB_kpts(kpts:List[KeyPoint], device:device=device(type='cpu'), with_resp:bool=False)

{% endraw %} {% raw %}

laf_from_opencv_SIFT_kpts[source]

laf_from_opencv_SIFT_kpts(kpts:List[KeyPoint], device:device=device(type='cpu'), with_resp:bool=False)

{% endraw %} {% raw %}

opencv_SIFT_kpts_from_laf[source]

opencv_SIFT_kpts_from_laf(lafs, resps:Optional[Tensor]=None)

{% endraw %} {% raw %}

opencv_ORB_kpts_from_laf[source]

opencv_ORB_kpts_from_laf(lafs, resps:Optional[Tensor]=None)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
%matplotlib inline
import cv2
import matplotlib.pyplot as plt
img = cv2.cvtColor(cv2.imread('data/strahov.png'), cv2.COLOR_BGR2RGB)

det = cv2.SIFT_create(500)
kps, descs = det.detectAndCompute(img, None)

out_img = cv2.drawKeypoints(img, kps, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
plt.imshow(out_img)
libpng warning: iCCP: known incorrect sRGB profile
<matplotlib.image.AxesImage at 0x1644c2940>
{% endraw %}

The keypoints are small, because, unlike for ORB, for SIFT OpenCV draws not real regions to be described, but the radius of the blobs, which are detected. Kornia and kornia_moons, inlike OpenCV, shows the real description region.

{% raw %}
lafs, r = laf_from_opencv_SIFT_kpts(kps, with_resp=True)
visualize_LAF(K.image_to_tensor(img, False), lafs, 0, 'y', figsize=(8,6))
{% endraw %}

If you want to see the image, similar to OpenCV one, you can scale LAFs by factor 1/12.

{% raw %}
visualize_LAF(K.image_to_tensor(img, False), K.feature.laf.scale_laf(lafs, 1./6.0), 0, 'y', figsize=(8,6))
{% endraw %}

Now let's do the same for matches format

{% raw %}

cv2_matches_from_kornia[source]

cv2_matches_from_kornia(match_dists:Tensor, match_idxs:Tensor)

{% endraw %} {% raw %}

kornia_matches_from_cv2[source]

kornia_matches_from_cv2(cv2_matches, device=device(type='cpu'))

{% endraw %} {% raw %}
{% endraw %} {% raw %}
from torch import allclose
match_dists, match_idxs = K.feature.match_nn(torch.from_numpy(descs).float(),
                                             torch.from_numpy(descs).float())

cv2_matches = cv2_matches_from_kornia(match_dists, match_idxs)
out_img = cv2.drawMatches(img, kps, img, kps, cv2_matches, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
plt.figure(figsize=(10,5))
plt.imshow(out_img)

match_dists_back, match_idxs_back = kornia_matches_from_cv2(cv2_matches)

assert(allclose(match_dists_back, match_dists))
assert(allclose(match_idxs_back, match_idxs))
{% endraw %} {% raw %}

to_numpy_image[source]

to_numpy_image(img:Union[str, array, Tensor])

{% endraw %} {% raw %}
{% endraw %} {% raw %}
assert isinstance(to_numpy_image('data/strahov.png'), np.ndarray)
libpng warning: iCCP: known incorrect sRGB profile
{% endraw %} {% raw %}

epilines_to_start_end_points[source]

epilines_to_start_end_points(epi, h, w)

{% endraw %} {% raw %}
{% endraw %} {% raw %}

draw_LAF_matches[source]

draw_LAF_matches(lafs1, lafs2, tent_idxs, img1, img2, inlier_mask=None, draw_dict={'inlier_color': (0.2, 1, 0.2), 'tentative_color': (0.8, 0.8, 0), 'feature_color': (0.2, 0.5, 1), 'vertical': False}, Fm:Optional[array]=None, H:Optional[array]=None, ax:Optional=None)

This function draws LAFs, tentative matches, inliers epipolar lines (if F is provided), and image1 corners reprojection into image 2 (if H is provided)

{% endraw %} {% raw %}
{% endraw %}

We will visualize ORB features (blue), tentative matches (yellow) and inliers(greenish)

{% raw %}
import numpy as np
det = cv2.ORB_create(100)
img1_fname = 'data/strahov.png'
kps1, descs1 = det.detectAndCompute(cv2.imread(img1_fname,0), None)
lafs1 = laf_from_opencv_ORB_kpts(kps1)
idxs = torch.stack([torch.arange(50),torch.arange(50)], dim=-1)
draw_LAF_matches(lafs1, lafs1, idxs,
                  img1_fname,img1_fname, 
                  [True if i%2 == 0 else False for i in range(len(idxs))],
                    draw_dict={"inlier_color": (0.2, 1, 0.2),
                               "tentative_color": (0.8, 0.8, 0), 
                               "feature_color": (0.2, 0.5, 1),
                              "vertical": False})
libpng warning: iCCP: known incorrect sRGB profile
libpng warning: iCCP: known incorrect sRGB profile
libpng warning: iCCP: known incorrect sRGB profile
{% endraw %}

Now let's try with epipolar matrix for the translation. Inliers should lie on the horizontal epipolar lines

{% raw %}
Fmat = np.array([[0., 0., 0.],
                 [0, 0, -1],
                [0, 1, 0]])
draw_LAF_matches(lafs1, lafs1, idxs,
                  img1_fname,img1_fname, 
                  [True if i%2 == 0 else False for i in range(len(idxs))],
                    draw_dict={"inlier_color": (0.2, 1, 0.2),
                               "tentative_color": (0.8, 0.8, 0), 
                               "feature_color": (0.2, 0.5, 1),
                              "vertical": True}, Fm = Fmat)
libpng warning: iCCP: known incorrect sRGB profile
libpng warning: iCCP: known incorrect sRGB profile
{% endraw %}

Now we will transform the image, match it, find the homography and visualize it.

{% raw %}
import numpy as np
det = cv2.SIFT_create(100)
img1_fname = 'data/strahov.png'
img1 = cv2.cvtColor(cv2.imread(img1_fname), cv2.COLOR_BGR2RGB)

Hgt = np.array([[0.5, 0.1, 10],
                [-0.1, 0.5, 10],
               [0, 0, 1]])
img2 = cv2.warpPerspective(img1, Hgt, img1.shape[:2][::-1], borderValue=(255,255,255))



kps1, descs1 = det.detectAndCompute(img1, None)
lafs1 = laf_from_opencv_SIFT_kpts(kps1)

kps2, descs2 = det.detectAndCompute(img2, None)
lafs2 = laf_from_opencv_SIFT_kpts(kps2)


match_dists, match_idxs = K.feature.match_snn(torch.from_numpy(descs1).float(),
                                              torch.from_numpy(descs2).float(), 0.98)

H, mask = cv2.findHomography(KF.get_laf_center(lafs1[:,match_idxs[:,0]]).detach().cpu().numpy().reshape(-1,2),
                             KF.get_laf_center(lafs2[:,match_idxs[:,1]]).detach().cpu().numpy().reshape(-1,2),
                             cv2.USAC_MAGSAC, 0.5)

                             
draw_LAF_matches(lafs1, lafs2, match_idxs,
                  img1, img2, 
                  mask,
                  draw_dict={"inlier_color": (0.2, 1, 0.2),
                               "tentative_color": (0.8, 0.8, 0), 
                               "feature_color": None,
                              "vertical": False}, H = H)
libpng warning: iCCP: known incorrect sRGB profile
{% endraw %}

And the same with fundamental matrix

{% raw %}
import numpy as np
det = cv2.SIFT_create(75)
img1_fname = 'data/strahov.png'
img1 = cv2.cvtColor(cv2.imread(img1_fname), cv2.COLOR_BGR2RGB)

Hgt = np.array([[0.75, -0.1, 10],
                [0.1, 0.75, 10],
               [0, 0, 1]])

img2 = cv2.warpPerspective(img1, Hgt, img1.shape[:2][::-1], borderValue=(255,255,255))


kps1, descs1 = det.detectAndCompute(img1, None)
lafs1 = laf_from_opencv_SIFT_kpts(kps1)

kps2, descs2 = det.detectAndCompute(img2, None)
lafs2 = laf_from_opencv_SIFT_kpts(kps2)



match_dists, match_idxs = K.feature.match_snn(torch.from_numpy(descs1).float(),
                                              torch.from_numpy(descs2).float(), 0.95)

Fmat, mask = cv2.findFundamentalMat(KF.get_laf_center(lafs1[:,match_idxs[:,0]]).detach().cpu().numpy().reshape(-1,2),
                             KF.get_laf_center(lafs2[:,match_idxs[:,1]]).detach().cpu().numpy().reshape(-1,2),
                             cv2.USAC_MAGSAC, 0.5)

                             
draw_LAF_matches(lafs1, lafs2, match_idxs,
                  img1, img2, 
                  mask,
                  draw_dict={"inlier_color": None,#(0.2, 1, 0.2),
                               "tentative_color": (0.8, 0.8, 0), 
                               "feature_color": None,
                              "vertical": True})
libpng warning: iCCP: known incorrect sRGB profile
{% endraw %} {% raw %}

draw_LAF_matches_from_result_dict[source]

draw_LAF_matches_from_result_dict(result_dict, img1, img2, draw_dict={'inlier_color': (0.2, 1, 0.2), 'tentative_color': (0.8, 0.8, 0), 'feature_color': (0.2, 0.5, 1), 'vertical': False})

{% endraw %} {% raw %}
{% endraw %} {% raw %}

draw_LAF_inliers_perspective_repjojected[source]

draw_LAF_inliers_perspective_repjojected(lafs1, lafs2, tent_idxs, img1, img2, inlier_mask=None, draw_dict={'inlier_color': (0.2, 1, 0.2), 'reprojected_color': (0.2, 0.5, 1), 'vertical': False}, H:array=None, ax:Optional=None)

This function draws tentative matches and inliers given the homography H

{% endraw %} {% raw %}
{% endraw %} {% raw %}
!wget https://www.robots.ox.ac.uk/~vgg/research/affine/det_eval_files/graf.tar.gz
!tar -xzf graf.tar.gz
--2022-08-24 16:52:50--  https://www.robots.ox.ac.uk/~vgg/research/affine/det_eval_files/graf.tar.gz
Resolving www.robots.ox.ac.uk (www.robots.ox.ac.uk)... 129.67.94.2
Connecting to www.robots.ox.ac.uk (www.robots.ox.ac.uk)|129.67.94.2|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 8414417 (8,0M) [application/x-gzip]
Saving to: ‘graf.tar.gz.4’

graf.tar.gz.4       100%[===================>]   8,02M  5,50MB/s    in 1,5s    

2022-08-24 16:52:51 (5,50 MB/s) - ‘graf.tar.gz.4’ saved [8414417/8414417]

{% endraw %} {% raw %}
import numpy as np
det = cv2.SIFT_create(500)
img1_fname = 'img1.ppm'
img2_fname = 'img4.ppm'


img1 = cv2.cvtColor(cv2.imread(img1_fname), cv2.COLOR_BGR2RGB)
img2 = cv2.cvtColor(cv2.imread(img2_fname), cv2.COLOR_BGR2RGB)



Hgt = np.loadtxt('H1to4p')
img2 = cv2.warpPerspective(img1, Hgt, img1.shape[:2][::-1], borderValue=(255,255,255))



kps1, descs1 = det.detectAndCompute(img1, None)
lafs1 = laf_from_opencv_SIFT_kpts(kps1)
kps2, descs2 = det.detectAndCompute(img2, None)
lafs2 = laf_from_opencv_SIFT_kpts(kps2)


match_dists, match_idxs = K.feature.match_snn(torch.from_numpy(descs1).float(),
                                              torch.from_numpy(descs2).float(), 0.98)

H, mask = cv2.findHomography(KF.get_laf_center(lafs1[:,match_idxs[:,0]]).detach().cpu().numpy().reshape(-1,2),
                             KF.get_laf_center(lafs2[:,match_idxs[:,1]]).detach().cpu().numpy().reshape(-1,2),
                             cv2.USAC_MAGSAC, 0.5)

                             
draw_LAF_inliers_perspective_repjojected(lafs1, lafs2, match_idxs,
                  cv2.cvtColor(cv2.cvtColor(img1,cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB),
                  cv2.cvtColor(cv2.cvtColor(img2,cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB),
                  mask, H = H)
{% endraw %} {% raw %}

make_keypoints_upright[source]

make_keypoints_upright(kpts)

{% endraw %} {% raw %}

class OpenCVDetectorKornia[source]

OpenCVDetectorKornia(opencv_detector, mrSize:float=6.0, make_upright=False, max_kpts=-1) :: Module

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

{% endraw %} {% raw %}

class OpenCVFeatureKornia[source]

OpenCVFeatureKornia(opencv_detector, mrSize:float=6.0) :: Module

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

{% endraw %} {% raw %}

class OpenCVDetectorWithAffNetKornia[source]

OpenCVDetectorWithAffNetKornia(opencv_detector, make_upright=False, mrSize:float=6.0, max_kpts=-1) :: Module

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

{% endraw %} {% raw %}
{% endraw %} {% raw %}
import matplotlib.pyplot as plt


kornia_cv2dog = OpenCVDetectorKornia(cv2.SIFT_create(500))
kornia_cv2sift = OpenCVFeatureKornia(cv2.SIFT_create(500))


timg = K.image_to_tensor(cv2.cvtColor(cv2.imread('data/strahov.png'), cv2.COLOR_BGR2RGB), False).float()/255.


lafs, r = kornia_cv2dog(timg)
lafs2, r2, descs2 = kornia_cv2sift(timg)


visualize_LAF(timg, lafs, 0, 'y', figsize=(8,6))
libpng warning: iCCP: known incorrect sRGB profile
{% endraw %} {% raw %}
import matplotlib.pyplot as plt


kornia_cv2dogaffnet = OpenCVDetectorWithAffNetKornia(cv2.SIFT_create(500), make_upright=True)


timg = K.image_to_tensor(cv2.cvtColor(cv2.imread('data/strahov.png'), cv2.COLOR_BGR2RGB), False).float()/255.


lafs, r = kornia_cv2dogaffnet(timg)



visualize_LAF(timg, lafs, 0, 'y', figsize=(8,6))
/var/folders/j9/y_61c9h10xz3d5g4d1rrny5c0000gn/T/ipykernel_56062/1100302891.py:68: DeprecationWarning: `LAFAffNetShapeEstimator` default behaviour is changed and now it does preserve original LAF orientation. Make sure your code accounts for this.
  self.affnet = KF.LAFAffNetShapeEstimator(True).eval()
libpng warning: iCCP: known incorrect sRGB profile
{% endraw %}